"loss tick" occationally and APIC time calibration issue.
Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
static __inline__ void missed_ticks(struct hvm_virpit*vpit)
{
- int missed_ticks;
+ int missed_ticks;
missed_ticks = (NOW() - vpit->scheduled)/(s_time_t) vpit->period;
- if ( missed_ticks > 0 ) {
+ if ( missed_ticks++ >= 0 ) {
vpit->pending_intr_nr += missed_ticks;
vpit->scheduled += missed_ticks * vpit->period;
}
/* pick up missed timer tick */
missed_ticks(vpit);
-
- vpit->pending_intr_nr++;
if ( test_bit(_VCPUF_running, &v->vcpu_flags) ) {
- vpit->scheduled += vpit->period;
set_timer(&vpit->pit_timer, vpit->scheduled);
}
}
+/* pick up missed timer ticks at deactive time */
void pickup_deactive_ticks(struct hvm_virpit *vpit)
{
-
if ( !active_timer(&(vpit->pit_timer)) ) {
- /* pick up missed timer tick */
missed_ticks(vpit);
-
- vpit->scheduled += vpit->period;
set_timer(&vpit->pit_timer, vpit->scheduled);
}
}
#define BSP_CPU(v) (!(v->vcpu_id))
-void vmx_set_tsc_shift(struct vcpu *v, struct hvm_virpit *vpit)
+static inline
+void __set_tsc_offset(u64 offset)
{
- u64 drift;
-
- if ( vpit->first_injected )
- drift = vpit->period_cycles * vpit->pending_intr_nr;
- else
- drift = 0;
- vpit->shift = v->arch.hvm_vmx.tsc_offset - drift;
- __vmwrite(TSC_OFFSET, vpit->shift);
-
+ __vmwrite(TSC_OFFSET, offset);
#if defined (__i386__)
- __vmwrite(TSC_OFFSET_HIGH, ((vpit->shift)>> 32));
+ __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
#endif
}
+u64 get_guest_time(struct vcpu *v)
+{
+ struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+ u64 host_tsc;
+
+ rdtscll(host_tsc);
+ return host_tsc + vpit->cache_tsc_offset;
+}
+
+void set_guest_time(struct vcpu *v, u64 gtime)
+{
+ struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+ u64 host_tsc;
+
+ rdtscll(host_tsc);
+
+ vpit->cache_tsc_offset = gtime - host_tsc;
+ __set_tsc_offset(vpit->cache_tsc_offset);
+}
+
static inline void
interrupt_post_injection(struct vcpu * v, int vector, int type)
{
if ( is_pit_irq(v, vector, type) ) {
if ( !vpit->first_injected ) {
vpit->pending_intr_nr = 0;
+ vpit->last_pit_gtime = get_guest_time(v);
vpit->scheduled = NOW() + vpit->period;
set_timer(&vpit->pit_timer, vpit->scheduled);
vpit->first_injected = 1;
vpit->pending_intr_nr--;
}
vpit->inject_point = NOW();
- vmx_set_tsc_shift (v, vpit);
+
+ vpit->last_pit_gtime += vpit->period;
+ set_guest_time(v, vpit->last_pit_gtime);
}
switch(type)
vmx_stts();
+ /* pick up the elapsed PIT ticks and re-enable pit_timer */
+ if ( vpit->first_injected) {
+ set_guest_time(v, v->domain->arch.hvm_domain.guest_time);
+ pickup_deactive_ticks(vpit);
+ }
+
if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
hvm_wait_io();
- /* pick up the elapsed PIT ticks and re-enable pit_timer */
- if ( vpit->first_injected )
- pickup_deactive_ticks(vpit);
- vmx_set_tsc_shift(v, vpit);
-
/* We can't resume the guest if we're waiting on I/O */
ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
}
/* Update CR3, GDT, LDT, TR */
unsigned int error = 0;
unsigned long cr0, cr4;
- u64 host_tsc;
if (v->vcpu_id == 0)
hvm_setup_platform(v->domain);
v->arch.hvm_vmx.launch_cpu = smp_processor_id();
/* init guest tsc to start from 0 */
- rdtscll(host_tsc);
- v->arch.hvm_vmx.tsc_offset = 0 - host_tsc;
- vmx_set_tsc_shift(v, &v->domain->arch.hvm_domain.vpit);
+ set_guest_time(v, 0);
}
/*
#endif /* __i386__ */
+static void vmx_freeze_time(struct vcpu *v)
+{
+ struct hvm_virpit *vpit = &v->domain->arch.hvm_domain.vpit;
+
+ v->domain->arch.hvm_domain.guest_time = get_guest_time(v);
+ if ( vpit->first_injected )
+ stop_timer(&(vpit->pit_timer));
+}
+
static void vmx_ctxt_switch_from(struct vcpu *v)
{
+ vmx_freeze_time(v);
vmx_save_segments(v);
vmx_load_msrs();
}
rdtscll(msr_content);
vpit = &(v->domain->arch.hvm_domain.vpit);
- msr_content += vpit->shift;
+ msr_content += vpit->cache_tsc_offset;
break;
}
case MSR_IA32_SYSENTER_CS:
switch (regs->ecx) {
case MSR_IA32_TIME_STAMP_COUNTER:
- {
- struct hvm_virpit *vpit;
- u64 host_tsc, drift;
-
- rdtscll(host_tsc);
- vpit = &(v->domain->arch.hvm_domain.vpit);
- drift = v->arch.hvm_vmx.tsc_offset - vpit->shift;
- vpit->shift = msr_content - host_tsc;
- v->arch.hvm_vmx.tsc_offset = vpit->shift + drift;
- __vmwrite(TSC_OFFSET, vpit->shift);
-
-#if defined (__i386__)
- __vmwrite(TSC_OFFSET_HIGH, ((vpit->shift)>>32));
-#endif
+ set_guest_time(v, msr_content);
break;
- }
case MSR_IA32_SYSENTER_CS:
__vmwrite(GUEST_SYSENTER_CS, msr_content);
break;
unsigned int pae_enabled;
struct hvm_virpit vpit;
+ u64 guest_time;
struct hvm_virpic vpic;
struct hvm_vioapic vioapic;
struct hvm_io_handler io_handler;
unsigned long cpu_based_exec_control;
struct vmx_msr_state msr_content;
void *io_bitmap_a, *io_bitmap_b;
- u64 tsc_offset;
struct timer hlt_timer; /* hlt ins emulation wakeup timer */
};
extern void vmx_asm_do_resume(void);
extern void vmx_asm_do_launch(void);
extern void vmx_intr_assist(void);
-extern void vmx_set_tsc_shift(struct vcpu *, struct hvm_virpit *);
extern void vmx_migrate_timers(struct vcpu *v);
extern void arch_vmx_do_launch(struct vcpu *);
extern void arch_vmx_do_resume(struct vcpu *);
+extern void set_guest_time(struct vcpu *v, u64 gtime);
+extern u64 get_guest_time(struct vcpu *v);
extern unsigned int cpu_rev;
struct hvm_virpit {
/* for simulation of counter 0 in mode 2 */
u64 period_cycles; /* pit frequency in cpu cycles */
- u64 shift; /* save the value of offset - drift */
s_time_t inject_point; /* the time inject virt intr */
s_time_t scheduled; /* scheduled timer interrupt */
struct timer pit_timer; /* periodic timer for mode 2*/
unsigned int pending_intr_nr; /* the couner for pending timer interrupts */
u32 period; /* pit frequency in ns */
int first_injected; /* flag to prevent shadow window */
+ s64 cache_tsc_offset; /* cache of VMCS TSC_OFFSET offset */
+ u64 last_pit_gtime; /* guest time when last pit is injected */
/* virtual PIT state for handle related I/O */
int read_state;